Task Information

Total Number of Trials = 104

Sweet taste and bitter taste was selected by participant to reflect reward & punishment

Learning Curves for each Shape Pair

plot1
## Warning: Removed 2 rows containing missing values (geom_path).
## Warning: Removed 2 rows containing missing values (geom_point).

plot2
## Warning: Removed 3 rows containing missing values (geom_path).
## Warning: Removed 3 rows containing missing values (geom_point).

plot3

Learning Curves Together

plot4
## Warning: Removed 2 rows containing missing values (geom_path).
## Warning: Removed 3 rows containing missing values (geom_path).

Begin Processing Graphs for Each Participant

## Warning: package 'ggpubr' was built under R version 3.5.2
## Warning: package 'data.table' was built under R version 3.5.2

Plot “Heatmaps” of Outcomes During Training

mydata$outcome0[mydata$outcome == "Miss"] <- 0
## Warning: Unknown or uninitialised column: 'outcome0'.
mydata$outcome0[mydata$outcome == "punish"] <- -10
mydata$outcome0[mydata$outcome == "reward"] <- 10

hmTOTAL<-ggplot(mydata,aes(as.numeric(Count), as.factor(sub_num) , fill=outcome0))+
  geom_tile()+
  scale_fill_gradient2(low="red", high="green", na.value="black", name="")+
  theme_classic()+ xlab(label = "Trial") + ylab(label= 'Subject Number')+
  guides(fill=guide_legend(title='Outcome'))
  #geom_point(aes(shape=as.factor(choice), size=1, color=as.factor(choice)))

Plot “Heatmaps” of Outcomes During Training

-10 (red) = punishment (bitter taste)

0 (white) = missed press (no taste)

10 (green) = reward (sweet taste)

hmTOTAL

Overlay with Shape Selected & Outcome

hmTOTAL1
## Warning: The shape palette can deal with a maximum of 6 discrete values
## because more than 6 becomes difficult to discriminate; you have 7.
## Consider specifying shapes manually if you must have them.
## Warning: Removed 482 rows containing missing values (geom_point).

Split Into Groups Based on Posttest Performance

summary(data0$sensitivity_reward)
##    Min. 1st Qu.  Median    Mean 3rd Qu.    Max.    NA's 
##  0.2917  0.4444  0.5000  0.5096  0.5789  0.7500     104
data0$learn[data0$sensitivity_reward < 0.444]<- "didn't learn"
data0$learn[data0$sensitivity_reward >= 0.444 & data0$sensitivity_reward < 0.5  ]<- "maybe learn"
data0$learn[ data0$sensitivity_reward >= 0.5 & data0$sensitivity_reward < 0.57 ]<- "ok"
data0$learn[ data0$sensitivity_reward >= 0.57]<- "pretty good"
summary(as.factor(data0$learn))
## didn't learn  maybe learn           ok  pretty good         NA's 
##         2211         1541         2609         2643          104
hmTOTALgood<-ggplot(subset(data0, learn == "pretty good"),aes(as.numeric(Count), as.factor(sub_num) ,fill=outcome0))+
  geom_tile()+
  scale_fill_gradient2(low="red", high="green", na.value="black", name="") +
  theme_classic()+ xlab(label = "Trial") + ylab(label= 'Subject Number') +
  guides(fill=guide_legend(title='Outcome')) 
  #geom_point(aes(shape=as.factor(choice), size=1, color=as.factor(choice)))
#hmTOTALgood


hmTOTALbad<-ggplot(subset(data0, learn == "didn't learn"),aes(as.numeric(Count), as.factor(sub_num) ,fill=outcome0))+
  geom_tile()+
  scale_fill_gradient2(low="red", high="green", na.value="black", name="") +
  theme_classic()+ xlab(label = "Trial") + ylab(label= 'Subject Number') +
  guides(fill=guide_legend(title='Outcome')) 
  #geom_point(aes(shape=as.factor(choice), size=1, color=as.factor(choice)))
#hmTOTALbad

test1<-ggarrange(hmTOTALgood,hmTOTALbad, 
          labels = c("Good Posttest", "Bad Posttest"), 
          ncol = 1, nrow = 2)

Split Into Groups Based on Posttest Performance

test1

Plot “Heatmaps” of Choices During Training

-30 (pink) = Choose F (40% correct)

-20 (mid pink) = Choose D (30% correct)

-10 (light pink) = Choose B (20% correct)

0 (white) = missed press (no choice)

10 (light blue) = Choose E (60% correct)

20 (mid blue) = Choose C (70% correct)

30 (blue) = Choose A (80% correct)

test2

Are there differences in training between posstest groups?

mytable <- xtabs(~choice+learn, data=data0)
ftable(mytable) # print table 
##        learn didn't learn maybe learn  ok pretty good
## choice                                               
## A                     362         248 417         460
## B                     331         242 420         441
## C                     359         236 401         425
## D                     353         240 413         377
## E                     332         224 410         407
## F                     358         270 384         418
## Miss                  116          81 164         115
summary(mytable) # chi-square test of indepedence
## Call: xtabs(formula = ~choice + learn, data = data0)
## Number of cases in table: 9004 
## Number of factors: 2 
## Test for independence of all factors:
##  Chisq = 23.464, df = 18, p-value = 0.1734
mytable <- xtabs(~outcome+learn, data=data0)
ftable(mytable) # print table 
##         learn didn't learn maybe learn   ok pretty good
## outcome                                                
## Miss                   116          81  164         115
## punish                1032         717 1218        1227
## reward                1063         743 1227        1301
summary(mytable) # chi-square test of indepedence
## Call: xtabs(formula = ~outcome + learn, data = data0)
## Number of cases in table: 9004 
## Number of factors: 2 
## Test for independence of all factors:
##  Chisq = 10.656, df = 6, p-value = 0.09961
mytable <- xtabs(~congruent+learn, data=data0)
ftable(mytable) # print table 
##            learn didn't learn maybe learn   ok pretty good
## congruent                                                 
## matched                  1489         997 1734        1777
## mismatched                606         463  711         751
## Miss                      116          81  164         115
summary(mytable) # chi-square test of indepedence
## Call: xtabs(formula = ~congruent + learn, data = data0)
## Number of cases in table: 9004 
## Number of factors: 2 
## Test for independence of all factors:
##  Chisq = 13.713, df = 6, p-value = 0.03301

Ah ha moment

There is a difference between the number of mismatched trials in the “learners” and “non learners.” Those who “don’t learn” have more mismatches.